In [1]:
import sys
!{sys.executable} -m pip install pandas
!{sys.executable} -m pip install tensorflow
!{sys.executable} -m pip install sklearn
!{sys.executable} -m pip install seaborn
!{sys.executable} -m pip install numpy
!{sys.executable} -m pip install matplotlib
!{sys.executable} -m pip install chart_studio
!{sys.executable} -m pip install plotly
!{sys.executable} -m pip install tensorflow_addons

import os
import tensorflow as tf
tf.__version__
Requirement already satisfied: pandas in /usr/local/lib/python3.9/site-packages (1.2.4)
Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.9/site-packages (from pandas) (2021.1)
Requirement already satisfied: numpy>=1.16.5 in /usr/local/lib/python3.9/site-packages (from pandas) (1.19.5)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.9/site-packages (from pandas) (2.8.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/site-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: tensorflow in /usr/local/lib/python3.9/site-packages (2.5.0)
Requirement already satisfied: typing-extensions~=3.7.4 in /usr/local/lib/python3.9/site-packages (from tensorflow) (3.7.4.3)
Requirement already satisfied: tensorflow-estimator<2.6.0,>=2.5.0rc0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (2.5.0)
Requirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (3.3.0)
Requirement already satisfied: absl-py~=0.10 in /usr/local/lib/python3.9/site-packages (from tensorflow) (0.12.0)
Requirement already satisfied: termcolor~=1.1.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.1.0)
Requirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.9/site-packages (from tensorflow) (0.2.0)
Requirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.9/site-packages (from tensorflow) (3.17.1)
Requirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.1.2)
Requirement already satisfied: h5py~=3.1.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (3.1.0)
Requirement already satisfied: tensorboard~=2.5 in /usr/local/lib/python3.9/site-packages (from tensorflow) (2.5.0)
Requirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.12.1)
Requirement already satisfied: keras-nightly~=2.5.0.dev in /usr/local/lib/python3.9/site-packages (from tensorflow) (2.5.0.dev2021032900)
Requirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.9/site-packages (from tensorflow) (0.36.2)
Requirement already satisfied: numpy~=1.19.2 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.19.5)
Requirement already satisfied: six~=1.15.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.15.0)
Requirement already satisfied: gast==0.4.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (0.4.0)
Requirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.12)
Requirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.6.3)
Requirement already satisfied: grpcio~=1.34.0 in /usr/local/lib/python3.9/site-packages (from tensorflow) (1.34.1)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (2.25.1)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (0.4.4)
Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (1.30.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (3.3.4)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (1.8.0)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (56.0.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (2.0.1)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.9/site-packages (from tensorboard~=2.5->tensorflow) (0.6.1)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.9/site-packages (from google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.9/site-packages (from google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow) (4.2.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.9/site-packages (from google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow) (0.2.8)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.9/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.5->tensorflow) (1.3.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.9/site-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow) (0.4.8)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorboard~=2.5->tensorflow) (2.10)
Requirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorboard~=2.5->tensorflow) (4.0.0)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorboard~=2.5->tensorflow) (1.26.5)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorboard~=2.5->tensorflow) (2020.12.5)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.9/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.5->tensorflow) (3.1.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: sklearn in /usr/local/lib/python3.9/site-packages (0.0)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.9/site-packages (from sklearn) (0.24.2)
Requirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.19.5)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.0.1)
Requirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.6.3)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.9/site-packages (from scikit-learn->sklearn) (2.1.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: seaborn in /usr/local/lib/python3.9/site-packages (0.11.1)
Requirement already satisfied: matplotlib>=2.2 in /usr/local/lib/python3.9/site-packages (from seaborn) (3.4.2)
Requirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.9/site-packages (from seaborn) (1.19.5)
Requirement already satisfied: pandas>=0.23 in /usr/local/lib/python3.9/site-packages (from seaborn) (1.2.4)
Requirement already satisfied: scipy>=1.0 in /usr/local/lib/python3.9/site-packages (from seaborn) (1.6.3)
Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/site-packages (from matplotlib>=2.2->seaborn) (8.2.0)
Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/site-packages (from matplotlib>=2.2->seaborn) (2.8.1)
Requirement already satisfied: pyparsing>=2.2.1 in /usr/local/lib/python3.9/site-packages (from matplotlib>=2.2->seaborn) (2.4.7)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/site-packages (from matplotlib>=2.2->seaborn) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/site-packages (from matplotlib>=2.2->seaborn) (1.3.1)
Requirement already satisfied: six in /usr/local/lib/python3.9/site-packages (from cycler>=0.10->matplotlib>=2.2->seaborn) (1.15.0)
Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.9/site-packages (from pandas>=0.23->seaborn) (2021.1)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: numpy in /usr/local/lib/python3.9/site-packages (1.19.5)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/site-packages (3.4.2)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.9/site-packages (from matplotlib) (0.10.0)
Requirement already satisfied: pyparsing>=2.2.1 in /usr/local/lib/python3.9/site-packages (from matplotlib) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.9/site-packages (from matplotlib) (1.3.1)
Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.9/site-packages (from matplotlib) (2.8.1)
Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.9/site-packages (from matplotlib) (8.2.0)
Requirement already satisfied: numpy>=1.16 in /usr/local/lib/python3.9/site-packages (from matplotlib) (1.19.5)
Requirement already satisfied: six in /usr/local/lib/python3.9/site-packages (from cycler>=0.10->matplotlib) (1.15.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: chart_studio in /usr/local/lib/python3.9/site-packages (1.1.0)
Requirement already satisfied: requests in /usr/local/lib/python3.9/site-packages (from chart_studio) (2.25.1)
Requirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.9/site-packages (from chart_studio) (1.3.3)
Requirement already satisfied: plotly in /usr/local/lib/python3.9/site-packages (from chart_studio) (4.14.3)
Requirement already satisfied: six in /usr/local/lib/python3.9/site-packages (from chart_studio) (1.15.0)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/site-packages (from requests->chart_studio) (2020.12.5)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/site-packages (from requests->chart_studio) (1.26.5)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.9/site-packages (from requests->chart_studio) (2.10)
Requirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.9/site-packages (from requests->chart_studio) (4.0.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: plotly in /usr/local/lib/python3.9/site-packages (4.14.3)
Requirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.9/site-packages (from plotly) (1.3.3)
Requirement already satisfied: six in /usr/local/lib/python3.9/site-packages (from plotly) (1.15.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Requirement already satisfied: tensorflow_addons in /usr/local/lib/python3.9/site-packages (0.13.0)
Requirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.9/site-packages (from tensorflow_addons) (2.12.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Out[1]:
'2.5.0'
In [2]:
import pandas as pd
from sklearn.cluster import KMeans
from sklearn import metrics
import re
from sklearn.impute import SimpleImputer
from numpy import random
import seaborn as sb
In [3]:
data = pd.read_csv(r'77_cancer_proteomes_CPTAC_itraq.csv',header=0,index_col=None)
clinical = pd.read_csv(r'clinical_data_breast_cancer.csv',header=0,index_col=None)
pam50 = pd.read_csv(r'PAM50_proteins.csv',header=0)
In [4]:
data.head()
Out[4]:
RefSeq_accession_number gene_symbol gene_name AO-A12D.01TCGA C8-A131.01TCGA AO-A12B.01TCGA BH-A18Q.02TCGA C8-A130.02TCGA C8-A138.03TCGA E2-A154.03TCGA ... AO-A12B.34TCGA A2-A0SW.35TCGA AO-A0JL.35TCGA BH-A0BV.35TCGA A2-A0YM.36TCGA BH-A0C7.36TCGA A2-A0SX.36TCGA 263d3f-I.CPTAC blcdb9-I.CPTAC c4155b-C.CPTAC
0 NP_958782 PLEC plectin isoform 1 1.096131 2.609943 -0.659828 0.195341 -0.494060 2.765081 0.862659 ... -0.963904 -0.487772 -0.10668 -0.065838 0.655850 -0.552212 -0.398560 0.598585 -0.191285 0.566975
1 NP_958785 NaN plectin isoform 1g 1.111370 2.650422 -0.648742 0.215413 -0.503899 2.779709 0.870186 ... -0.938210 -0.487772 -0.10668 -0.055893 0.658143 -0.547749 -0.392601 0.606697 -0.183918 0.578702
2 NP_958786 PLEC plectin isoform 1a 1.111370 2.650422 -0.654285 0.215413 -0.500619 2.779709 0.870186 ... -0.943919 -0.487772 -0.10668 -0.065838 0.655850 -0.552212 -0.392601 0.603993 -0.186022 0.576747
3 NP_000436 NaN plectin isoform 1c 1.107561 2.646374 -0.632113 0.205377 -0.510459 2.797995 0.866423 ... -0.935355 -0.487772 -0.10668 -0.055893 0.655850 -0.552212 -0.392601 0.603993 -0.186022 0.576747
4 NP_958781 NaN plectin isoform 1e 1.115180 2.646374 -0.640428 0.215413 -0.503899 2.787023 0.870186 ... -0.935355 -0.503853 -0.10668 -0.062523 0.651264 -0.556675 -0.395581 0.603993 -0.167079 0.576747

5 rows × 86 columns

In [5]:
clinical.head()
Out[5]:
Complete TCGA ID Gender Age at Initial Pathologic Diagnosis ER Status PR Status HER2 Final Status Tumor Tumor--T1 Coded Node Node-Coded ... PAM50 mRNA SigClust Unsupervised mRNA SigClust Intrinsic mRNA miRNA Clusters methylation Clusters RPPA Clusters CN Clusters Integrated Clusters (with PAM50) Integrated Clusters (no exp) Integrated Clusters (unsup exp)
0 TCGA-A2-A0T2 FEMALE 66 Negative Negative Negative T3 T_Other N3 Positive ... Basal-like 0 -13 3 5 Basal 3 2 2 2
1 TCGA-A2-A0CM FEMALE 40 Negative Negative Negative T2 T_Other N0 Negative ... Basal-like -12 -13 4 4 Basal 4 2 1 1
2 TCGA-BH-A18V FEMALE 48 Negative Negative Negative T2 T_Other N1 Positive ... Basal-like -12 -13 5 5 Basal 1 2 2 2
3 TCGA-BH-A18Q FEMALE 56 Negative Negative Negative T2 T_Other N1 Positive ... Basal-like -12 -13 5 5 Basal 1 2 2 2
4 TCGA-BH-A0E0 FEMALE 38 Negative Negative Negative T3 T_Other N3 Positive ... Basal-like 0 -13 5 5 Basal 1 2 2 2

5 rows × 30 columns

In [6]:
pam50.head()
Out[6]:
GeneSymbol RefSeqProteinID Species Gene Name
0 MIA NP_006524 Homo sapiens melanoma inhibitory activity
1 FGFR4 NP_002002 Homo sapiens fibroblast growth factor receptor 4
2 FGFR4 NP_998812 Homo sapiens fibroblast growth factor receptor 4
3 FGFR4 NP_075252 Homo sapiens fibroblast growth factor receptor 4
4 GPR160 NP_055188 Homo sapiens G protein-coupled receptor 160
In [7]:
data.columns = [re.sub('\.[0-9][0-9]TCGA',"",x)for x in data.columns]
clinical['Complete TCGA ID'] = [re.sub('TCGA\-',"",x) for x in clinical['Complete TCGA ID']]
In [8]:
clinical.head()
Out[8]:
Complete TCGA ID Gender Age at Initial Pathologic Diagnosis ER Status PR Status HER2 Final Status Tumor Tumor--T1 Coded Node Node-Coded ... PAM50 mRNA SigClust Unsupervised mRNA SigClust Intrinsic mRNA miRNA Clusters methylation Clusters RPPA Clusters CN Clusters Integrated Clusters (with PAM50) Integrated Clusters (no exp) Integrated Clusters (unsup exp)
0 A2-A0T2 FEMALE 66 Negative Negative Negative T3 T_Other N3 Positive ... Basal-like 0 -13 3 5 Basal 3 2 2 2
1 A2-A0CM FEMALE 40 Negative Negative Negative T2 T_Other N0 Negative ... Basal-like -12 -13 4 4 Basal 4 2 1 1
2 BH-A18V FEMALE 48 Negative Negative Negative T2 T_Other N1 Positive ... Basal-like -12 -13 5 5 Basal 1 2 2 2
3 BH-A18Q FEMALE 56 Negative Negative Negative T2 T_Other N1 Positive ... Basal-like -12 -13 5 5 Basal 1 2 2 2
4 BH-A0E0 FEMALE 38 Negative Negative Negative T3 T_Other N3 Positive ... Basal-like 0 -13 5 5 Basal 1 2 2 2

5 rows × 30 columns

In [9]:
clinical_dict = dict(zip(clinical['Complete TCGA ID'],clinical['Tumor']))
In [10]:
clinical_dict[data.columns[-3]]='Healthy'
clinical_dict[data.columns[-2]]='Healthy'
clinical_dict[data.columns[-1]]='Healthy'
In [11]:
data_raw = data[data.columns[3:len(data.columns)]].T
data_raw.head()
Out[11]:
0 1 2 3 4 5 6 7 8 9 ... 12543 12544 12545 12546 12547 12548 12549 12550 12551 12552
AO-A12D 1.096131 1.111370 1.111370 1.107561 1.115180 1.107561 1.111370 1.111370 -1.517390 0.482754 ... NaN NaN NaN -0.340163 NaN NaN NaN NaN -0.633517 12.666488
AO-A12D 1.100688 1.100688 1.100688 1.100688 1.093358 1.097023 1.097023 1.097023 -2.413909 0.543630 ... NaN NaN 0.737867 0.129501 NaN -2.578828 NaN 1.294925 -0.189341 13.066445
C8-A131 2.609943 2.650422 2.650422 2.646374 2.646374 2.646374 2.650422 2.650422 3.909313 -1.045294 ... NaN NaN NaN 3.451902 NaN NaN NaN NaN 4.840325 0.140736
C8-A131 2.707250 2.733832 2.737629 2.733832 2.752819 2.737629 2.737629 2.737629 4.089502 -1.120524 ... -4.90273 NaN NaN 3.352807 NaN -2.130632 NaN NaN 2.027516 NaN
AO-A12B -0.659828 -0.648742 -0.654285 -0.632113 -0.640428 -0.654285 -0.648742 -0.648742 -0.618256 1.222003 ... NaN NaN NaN -1.718531 NaN NaN NaN NaN -1.965192 -2.854835

5 rows × 12553 columns

In [12]:
import numpy as np
In [13]:
Impute = SimpleImputer(missing_values=np.NaN,strategy='mean')
Impute.fit(data_raw)
dataX = Impute.transform(data_raw)
In [14]:
import matplotlib.pyplot as plt
In [15]:
SampleIntensities = data_raw.sum(axis=0)
SampleDist = plt.hist(SampleIntensities.values)
plt.title('Sample Intensity Distribution')
plt.show()
In [16]:
from sklearn import preprocessing
In [17]:
for inputs in range(len(dataX.T)):
  dataX.T[inputs] = preprocessing.scale(dataX.T[inputs])
In [18]:
SampleIntensities2 = dataX.sum(axis=0)
SampleDist2 = plt.hist(SampleIntensities2)
plt.title('Sample Intensities after Preprocessing')
plt.show()
In [19]:
IntermedSet = [clinical_dict[x] for x in list(data_raw.index)]
In [20]:
from sklearn.decomposition import PCA
In [21]:
pca = PCA(n_components=5)
dataX_pca = pca.fit(dataX)
dataX_pca2 = dataX_pca.transform(dataX)
In [22]:
a = pd.DataFrame(dataX_pca2)
b = pd.DataFrame(IntermedSet)
frames = [a,b]
result = pd.concat(frames,axis=1)
In [23]:
result.columns = ['pca_1','pca_2','pca_3','pca_4','pca_5','tumor']
In [24]:
result.head()
Out[24]:
pca_1 pca_2 pca_3 pca_4 pca_5 tumor
0 -56.618994 -10.946011 18.128681 64.435936 -2.825934 T1
1 -54.922089 -4.936564 19.861284 67.296282 0.361641 T1
2 -38.913022 -44.208030 9.101419 28.339622 -24.933006 T2
3 -34.776548 -49.080334 9.162740 26.980669 -25.420639 T2
4 32.028614 75.862568 -10.883569 18.301868 -41.203688 T2
In [25]:
result.shape
Out[25]:
(89, 6)
In [26]:
result.columns
Out[26]:
Index(['pca_1', 'pca_2', 'pca_3', 'pca_4', 'pca_5', 'tumor'], dtype='object')
In [27]:
X1 = result[['pca_1','pca_2','pca_3','pca_4','pca_5']]
y1 = result[['tumor']]
In [28]:
X1
Out[28]:
pca_1 pca_2 pca_3 pca_4 pca_5
0 -56.618994 -10.946011 18.128681 64.435936 -2.825934
1 -54.922089 -4.936564 19.861284 67.296282 0.361641
2 -38.913022 -44.208030 9.101419 28.339622 -24.933006
3 -34.776548 -49.080334 9.162740 26.980669 -25.420639
4 32.028614 75.862568 -10.883569 18.301868 -41.203688
... ... ... ... ... ...
84 30.904487 11.933101 -5.117084 -5.894893 -8.078842
85 2.747390 -43.058071 -14.540878 -42.656499 -18.219339
86 39.828947 -28.079847 -55.714679 -6.535952 -22.320070
87 47.540501 1.962466 -10.039702 15.652749 -12.786465
88 33.265525 -35.208994 -48.020931 8.375338 -31.905170

89 rows × 5 columns

In [29]:
y1
Out[29]:
tumor
0 T1
1 T1
2 T2
3 T2
4 T2
... ...
84 T2
85 T1
86 Healthy
87 Healthy
88 Healthy

89 rows × 1 columns

In [30]:
from sklearn.model_selection import train_test_split
In [31]:
X_train,X_test,y_train,y_test = train_test_split(X1,y1,test_size=0.2)
X_train1,X_test1,y_train1,y_test1 = train_test_split(X1,y1,test_size=0.2)
In [32]:
i = ['tumor']
y_train1 = pd.get_dummies(y_train1,columns=i,drop_first=False)
y_test1 = pd.get_dummies(y_test1,columns=i,drop_first=False)
In [33]:
pip install chart-studio
Requirement already satisfied: chart-studio in /usr/local/lib/python3.9/site-packages (1.1.0)
Requirement already satisfied: six in /usr/local/lib/python3.9/site-packages (from chart-studio) (1.15.0)
Requirement already satisfied: requests in /usr/local/lib/python3.9/site-packages (from chart-studio) (2.25.1)
Requirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.9/site-packages (from chart-studio) (1.3.3)
Requirement already satisfied: plotly in /usr/local/lib/python3.9/site-packages (from chart-studio) (4.14.3)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/site-packages (from requests->chart-studio) (1.26.5)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/site-packages (from requests->chart-studio) (2020.12.5)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.9/site-packages (from requests->chart-studio) (2.10)
Requirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.9/site-packages (from requests->chart-studio) (4.0.0)
WARNING: You are using pip version 21.1.1; however, version 21.1.2 is available.
You should consider upgrading via the '/usr/local/opt/python@3.9/bin/python3.9 -m pip install --upgrade pip' command.
Note: you may need to restart the kernel to use updated packages.
In [34]:
from chart_studio import plotly
In [35]:
import plotly.offline as pyoff
import plotly.graph_objs as go
In [36]:
x = y1.tumor
da = [go.Histogram(x=x)]

layout = go.Layout(title='Type of tumor count',xaxis=dict(title='Types of Tumor'),yaxis=dict(title='Count'),bargap=0.2,bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [37]:
x = clinical['Age at Initial Pathologic Diagnosis']
da = [go.Histogram(x=x)]

layout = go.Layout(title='Distribution of Age when Breast Cancer was diagnosed',
                   xaxis=dict(title='Age at initial pathalogical diagnosis'),
                   yaxis=dict(title='Count'),
                   bargap=0.2,
                   bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [38]:
x = clinical['ER Status']
da = [go.Histogram(x=x)]

layout = go.Layout(title='Estrogen Status',
                   xaxis=dict(title='ER Status'),
                   yaxis=dict(title='Count'),
                   bargap=0.2,
                   bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [39]:
x = clinical['PR Status']
da = [go.Histogram(x=x)]

layout = go.Layout(title='Postegene Status',
                   xaxis=dict(title='PR Status'),
                   yaxis=dict(title='Count'),
                   bargap=0.2,
                   bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [40]:
x = clinical['HER2 Final Status']
da = [go.Histogram(x=x)]

layout = go.Layout(title='HER2_FINAL Status',
                   xaxis=dict(title='HER2_FINAL Status'),
                   yaxis=dict(title='Count'),
                   bargap=0.2,
                   bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [41]:
x = clinical['AJCC Stage']
da = [go.Histogram(x=x)]

layout = go.Layout(title='AJCC Stage',
                   xaxis=dict(title='AJCC Stage'),
                   yaxis=dict(title='Count'),
                   bargap=0.2,
                   bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [42]:
x = clinical['Vital Status']
da = [go.Histogram(x=x)]

layout = go.Layout(title='Vital Status',
                   xaxis=dict(title='Vital Status'),
                   yaxis=dict(title='Count'),
                   bargap=0.2,
                   bargroupgap=0.1)
fig = go.Figure(data=da,layout=layout)
pyoff.iplot(fig,filename='styled histogram')
In [43]:
from sklearn.preprocessing import label_binarize
from sklearn import svm
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import confusion_matrix,roc_curve,auc,classification_report,accuracy_score
In [44]:
X_train,X_test,y_train,y_test = train_test_split(X1,y1,test_size=0.2,random_state=1)
In [45]:
def DeepLearning():
    global y_train
    import tensorflow_addons as tfa
    from sklearn.metrics import f1_score
    hiddenunits=[7]
    def convert_target_to_int(y):
        y = y.values.tolist()
        y = [item for sublist in y for item in sublist]
        y = np.where(y=='T1',0,y)
        y = np.where(y=='T2',1,y)
        y = np.where(y=='T3',2,y)
        y = np.where(y=='T4',3,y)
        y = np.where(y=='T1',0,y)
        y = np.where(y=='Healthy',4,y)
        y = [int(item) for item in y]
        y = pd.DataFrame(y)
        return y

    y_train_int = convert_target_to_int(y_train)
    y_test_int = convert_target_to_int(y_test)
    for i in hiddenunits:
        model=tf.keras.models.Sequential()
        model.add(tf.keras.layers.Dropout(0.2))
        model.add(tf.keras.layers.Dense(i,activation=tf.nn.relu))
        model.add(tf.keras.layers.Dense(i,activation=tf.nn.relu))
        model.add(tf.keras.layers.Dense(5,activation=tf.nn.softmax))
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=['accuracy'])
        history = model.fit(X_train, y_train_int, epochs=150, validation_split = 0.2, verbose = 2,batch_size=5)
        ret = model.predict(X_test)
        ret = np.argmax(ret, axis=1)
        # print("accuracy score: ",accuracy_score(y_test_int, ret))
        # print("predictions are: ",ret)
        # print("actual values are: ",y_test)
        # print("hidden units = ")
        # print(i)
        # print("\n")
        accuracy = history.history['val_accuracy']
        # print("accuracy is: ",accuracy[len(accuracy)-1])
        # print('confusion matrix: ',tf.math.confusion_matrix(y_test_int, ret))
        # print(y_test_int, ret)
        # print("f1 score: ",f1_score(y_test_int, ret, average='weighted'))
        return accuracy[len(accuracy)-1]*100
In [46]:
def MLP():
    from sklearn.neural_network import MLPClassifier
    clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
                        hidden_layer_sizes=(20, 10, 5, 2), random_state=10)

    clf.fit(X_train, y_train.values.ravel())
    pred = clf.predict(X_test)
    # print("Accuracy of MLPClassifier is: ", accuracy_score(y_test,pred)*100,"%")
    return accuracy_score(y_test,pred)*100
In [47]:
def SVM():
    clf = svm.SVC(C=10,gamma='scale',kernel='linear')
    clf.fit(X_train,y_train.values.ravel())
    predicted_svm_y = clf.predict(X_test)
    # print("Accuracy of SVM Classifier: ",accuracy_score(y_test,predicted_svm_y)*100,"%")
    return accuracy_score(y_test,predicted_svm_y)*100
In [48]:
def RandomForestClassifier():
    from sklearn.ensemble import RandomForestClassifier


    model = RandomForestClassifier(n_estimators=100,criterion='entropy',random_state=190,ccp_alpha=1.5,
                                bootstrap = True,
                                max_features = 'auto')

    model.fit(X_train, y_train.values.ravel())
    rf_predictions = model.predict(X_test)
    # print("Accuracy of Random Forest Classifier: ",accuracy_score(y_test,rf_predictions)*100,"%")
    return accuracy_score(y_test,rf_predictions)*100
In [49]:
def LogisticRegression():
    from sklearn.linear_model import LogisticRegression
    logreg = LogisticRegression(random_state=1,max_iter=500,solver='newton-cg')
    logreg.fit(X_train, y_train.values.ravel())
    lr_predictions = logreg.predict(X_test)
    # print("Accuracy of Logistic Regression: ",accuracy_score(y_test,lr_predictions)*100,"%")
    return accuracy_score(y_test,lr_predictions)*100
In [50]:
def KNeighborsClassifier():
    from sklearn.neighbors import KNeighborsClassifier  
    classifier= KNeighborsClassifier(n_neighbors=5, metric='minkowski',p=2)  
    classifier.fit(X_train, y_train.values.ravel())
    y_pred2= classifier.predict(X_test)  
    # print("Accuracy of KNN: ",accuracy_score(y_test,y_pred2)*100,"%")
    return accuracy_score(y_test,y_pred2)*100
In [51]:
def GradientBoostingClassifier():
    from sklearn.datasets import make_hastie_10_2
    from sklearn.ensemble import GradientBoostingClassifier
    clf = GradientBoostingClassifier(n_estimators=550, learning_rate=5, max_depth=5, random_state=110)
    clf.fit(X_train, y_train.values.ravel())
    gbc_pred = clf.predict(X_test)
    # print("Accuracy of Gradient Boosting Classifier: ",accuracy_score(y_test,y_pred2)*100,"%")
    return accuracy_score(y_test,y_pred2)*100
In [52]:
def plot_comparision(selected_models):
    model_names = []
    accuracies = []
    for model in selected_models:
        if model is "DeepLearning":
            model_names.append("Deep Learning")
            accuracies.append(DeepLearning())
            print()
        elif model is "MLP":
            model_names.append("MLP")
            accuracies.append(MLP())
        elif model is "KNeighborsClassifier":
            model_names.append("KNeighborsClassifier")
            accuracies.append(KNeighborsClassifier())
        elif model is "KNeighborsClassifier":
            model_names.append("KNeighborsClassifier")
            accuracies.append(KNeighborsClassifier())
        elif model is "LogisticRegression":
            model_names.append("LogisticRegression")
            accuracies.append(LogisticRegression())
        elif model is "RandomForestClassifier":
            model_names.append("RandomForestClassifier")
            accuracies.append(RandomForestClassifier())
        elif model is "SVM":
            model_names.append("SVM")
            accuracies.append(SVM())
    plt.barh(model_names, accuracies, align='center', label="Data 1")
    plt.legend()

    plt.ylabel('Models')
    plt.xlabel('Accuracies')
    plt.title('Comparision of various Models')

    plt.show()
plot_comparision(["DeepLearning", "MLP", "KNeighborsClassifier","LogisticRegression","RandomForestClassifier","GradientBoostingClassifier","SVM"])
<>:5: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:9: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:12: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:15: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:18: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:21: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:24: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:5: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:9: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:12: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:15: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:18: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:21: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<>:24: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:5: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:9: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:12: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:15: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:18: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:21: SyntaxWarning:

"is" with a literal. Did you mean "=="?

<ipython-input-1-8c36848c9ba7>:24: SyntaxWarning:

"is" with a literal. Did you mean "=="?

Epoch 1/150
12/12 - 1s - loss: 15.6269 - accuracy: 0.1250 - val_loss: 18.0582 - val_accuracy: 0.0667
Epoch 2/150
12/12 - 0s - loss: 16.6093 - accuracy: 0.1250 - val_loss: 16.6634 - val_accuracy: 0.0667
Epoch 3/150
12/12 - 0s - loss: 14.8037 - accuracy: 0.0714 - val_loss: 15.1888 - val_accuracy: 0.0667
Epoch 4/150
12/12 - 0s - loss: 16.3802 - accuracy: 0.1071 - val_loss: 13.6984 - val_accuracy: 0.0667
Epoch 5/150
12/12 - 0s - loss: 13.5016 - accuracy: 0.1250 - val_loss: 12.2463 - val_accuracy: 0.0667
Epoch 6/150
12/12 - 0s - loss: 10.5585 - accuracy: 0.1071 - val_loss: 11.1713 - val_accuracy: 0.0667
Epoch 7/150
12/12 - 0s - loss: 12.4309 - accuracy: 0.1429 - val_loss: 10.0715 - val_accuracy: 0.0667
Epoch 8/150
12/12 - 0s - loss: 11.1985 - accuracy: 0.2143 - val_loss: 9.0979 - val_accuracy: 0.1333
Epoch 9/150
12/12 - 0s - loss: 8.6368 - accuracy: 0.1786 - val_loss: 8.2221 - val_accuracy: 0.1333
Epoch 10/150
12/12 - 0s - loss: 7.8206 - accuracy: 0.2321 - val_loss: 7.5484 - val_accuracy: 0.1333
Epoch 11/150
12/12 - 0s - loss: 7.5196 - accuracy: 0.2500 - val_loss: 6.8256 - val_accuracy: 0.1333
Epoch 12/150
12/12 - 0s - loss: 7.4277 - accuracy: 0.2857 - val_loss: 6.1807 - val_accuracy: 0.1333
Epoch 13/150
12/12 - 0s - loss: 7.5433 - accuracy: 0.2679 - val_loss: 5.5149 - val_accuracy: 0.1333
Epoch 14/150
12/12 - 0s - loss: 8.2846 - accuracy: 0.2500 - val_loss: 5.0119 - val_accuracy: 0.1333
Epoch 15/150
12/12 - 0s - loss: 7.3497 - accuracy: 0.2143 - val_loss: 4.5162 - val_accuracy: 0.2000
Epoch 16/150
12/12 - 0s - loss: 6.3886 - accuracy: 0.3214 - val_loss: 3.9953 - val_accuracy: 0.2667
Epoch 17/150
12/12 - 0s - loss: 5.7740 - accuracy: 0.3036 - val_loss: 3.6376 - val_accuracy: 0.2667
Epoch 18/150
12/12 - 0s - loss: 5.5269 - accuracy: 0.2321 - val_loss: 3.3795 - val_accuracy: 0.3333
Epoch 19/150
12/12 - 0s - loss: 5.6423 - accuracy: 0.3036 - val_loss: 3.1690 - val_accuracy: 0.4667
Epoch 20/150
12/12 - 0s - loss: 4.6979 - accuracy: 0.3571 - val_loss: 2.9919 - val_accuracy: 0.5333
Epoch 21/150
12/12 - 0s - loss: 4.5530 - accuracy: 0.3750 - val_loss: 2.8455 - val_accuracy: 0.5333
Epoch 22/150
12/12 - 0s - loss: 4.1480 - accuracy: 0.3750 - val_loss: 2.7260 - val_accuracy: 0.6000
Epoch 23/150
12/12 - 0s - loss: 4.3345 - accuracy: 0.3571 - val_loss: 2.6272 - val_accuracy: 0.6000
Epoch 24/150
12/12 - 0s - loss: 3.7067 - accuracy: 0.4643 - val_loss: 2.5781 - val_accuracy: 0.6000
Epoch 25/150
12/12 - 0s - loss: 4.0163 - accuracy: 0.3571 - val_loss: 2.4764 - val_accuracy: 0.6000
Epoch 26/150
12/12 - 0s - loss: 3.5682 - accuracy: 0.3393 - val_loss: 2.4233 - val_accuracy: 0.6000
Epoch 27/150
12/12 - 0s - loss: 2.5443 - accuracy: 0.3929 - val_loss: 2.3537 - val_accuracy: 0.6000
Epoch 28/150
12/12 - 0s - loss: 3.8238 - accuracy: 0.3393 - val_loss: 2.2937 - val_accuracy: 0.6000
Epoch 29/150
12/12 - 0s - loss: 3.7637 - accuracy: 0.4107 - val_loss: 2.2080 - val_accuracy: 0.6000
Epoch 30/150
12/12 - 0s - loss: 3.6419 - accuracy: 0.3214 - val_loss: 2.0710 - val_accuracy: 0.6000
Epoch 31/150
12/12 - 0s - loss: 2.5963 - accuracy: 0.4464 - val_loss: 2.0174 - val_accuracy: 0.6000
Epoch 32/150
12/12 - 0s - loss: 2.5204 - accuracy: 0.4821 - val_loss: 1.9598 - val_accuracy: 0.6000
Epoch 33/150
12/12 - 0s - loss: 3.0285 - accuracy: 0.5357 - val_loss: 1.9305 - val_accuracy: 0.6000
Epoch 34/150
12/12 - 0s - loss: 2.8167 - accuracy: 0.5000 - val_loss: 1.9256 - val_accuracy: 0.6000
Epoch 35/150
12/12 - 0s - loss: 2.5085 - accuracy: 0.3571 - val_loss: 1.8564 - val_accuracy: 0.6000
Epoch 36/150
12/12 - 0s - loss: 2.7305 - accuracy: 0.4107 - val_loss: 1.8115 - val_accuracy: 0.6000
Epoch 37/150
12/12 - 0s - loss: 2.6943 - accuracy: 0.5179 - val_loss: 1.7831 - val_accuracy: 0.6000
Epoch 38/150
12/12 - 0s - loss: 2.5724 - accuracy: 0.4286 - val_loss: 1.7669 - val_accuracy: 0.6000
Epoch 39/150
12/12 - 0s - loss: 2.4570 - accuracy: 0.5000 - val_loss: 1.8204 - val_accuracy: 0.6000
Epoch 40/150
12/12 - 0s - loss: 2.1943 - accuracy: 0.4107 - val_loss: 1.7957 - val_accuracy: 0.6000
Epoch 41/150
12/12 - 0s - loss: 2.7350 - accuracy: 0.4643 - val_loss: 1.6607 - val_accuracy: 0.6000
Epoch 42/150
12/12 - 0s - loss: 2.4654 - accuracy: 0.4821 - val_loss: 1.5707 - val_accuracy: 0.6000
Epoch 43/150
12/12 - 0s - loss: 2.2013 - accuracy: 0.4464 - val_loss: 1.5208 - val_accuracy: 0.6000
Epoch 44/150
12/12 - 0s - loss: 2.5386 - accuracy: 0.4286 - val_loss: 1.4665 - val_accuracy: 0.6667
Epoch 45/150
12/12 - 0s - loss: 1.9449 - accuracy: 0.5179 - val_loss: 1.4531 - val_accuracy: 0.6667
Epoch 46/150
12/12 - 0s - loss: 1.7716 - accuracy: 0.4643 - val_loss: 1.4655 - val_accuracy: 0.6667
Epoch 47/150
12/12 - 0s - loss: 1.7861 - accuracy: 0.4821 - val_loss: 1.4774 - val_accuracy: 0.6667
Epoch 48/150
12/12 - 0s - loss: 2.0968 - accuracy: 0.4464 - val_loss: 1.3875 - val_accuracy: 0.6667
Epoch 49/150
12/12 - 0s - loss: 2.2185 - accuracy: 0.3929 - val_loss: 1.3363 - val_accuracy: 0.6667
Epoch 50/150
12/12 - 0s - loss: 2.1025 - accuracy: 0.4107 - val_loss: 1.2878 - val_accuracy: 0.6667
Epoch 51/150
12/12 - 0s - loss: 1.8274 - accuracy: 0.4821 - val_loss: 1.2458 - val_accuracy: 0.6667
Epoch 52/150
12/12 - 0s - loss: 1.8549 - accuracy: 0.4107 - val_loss: 1.2801 - val_accuracy: 0.6667
Epoch 53/150
12/12 - 0s - loss: 1.8214 - accuracy: 0.5179 - val_loss: 1.2903 - val_accuracy: 0.6667
Epoch 54/150
12/12 - 0s - loss: 1.6438 - accuracy: 0.4821 - val_loss: 1.2646 - val_accuracy: 0.6667
Epoch 55/150
12/12 - 0s - loss: 1.7011 - accuracy: 0.5893 - val_loss: 1.2105 - val_accuracy: 0.6667
Epoch 56/150
12/12 - 0s - loss: 1.7474 - accuracy: 0.5893 - val_loss: 1.2034 - val_accuracy: 0.6667
Epoch 57/150
12/12 - 0s - loss: 1.6338 - accuracy: 0.4821 - val_loss: 1.2112 - val_accuracy: 0.6667
Epoch 58/150
12/12 - 0s - loss: 1.5853 - accuracy: 0.4643 - val_loss: 1.2127 - val_accuracy: 0.6667
Epoch 59/150
12/12 - 0s - loss: 1.4272 - accuracy: 0.5357 - val_loss: 1.1687 - val_accuracy: 0.6667
Epoch 60/150
12/12 - 0s - loss: 1.5742 - accuracy: 0.5357 - val_loss: 1.1416 - val_accuracy: 0.6667
Epoch 61/150
12/12 - 0s - loss: 1.5422 - accuracy: 0.5000 - val_loss: 1.1033 - val_accuracy: 0.6667
Epoch 62/150
12/12 - 0s - loss: 1.6495 - accuracy: 0.5357 - val_loss: 1.0774 - val_accuracy: 0.6667
Epoch 63/150
12/12 - 0s - loss: 1.4378 - accuracy: 0.5714 - val_loss: 1.0406 - val_accuracy: 0.6667
Epoch 64/150
12/12 - 0s - loss: 1.6012 - accuracy: 0.5357 - val_loss: 1.0200 - val_accuracy: 0.6667
Epoch 65/150
12/12 - 0s - loss: 1.6068 - accuracy: 0.5179 - val_loss: 0.9842 - val_accuracy: 0.6667
Epoch 66/150
12/12 - 0s - loss: 1.1494 - accuracy: 0.6250 - val_loss: 0.9630 - val_accuracy: 0.7333
Epoch 67/150
12/12 - 0s - loss: 1.3783 - accuracy: 0.5536 - val_loss: 1.0069 - val_accuracy: 0.7333
Epoch 68/150
12/12 - 0s - loss: 1.5656 - accuracy: 0.5893 - val_loss: 1.0101 - val_accuracy: 0.7333
Epoch 69/150
12/12 - 0s - loss: 1.3575 - accuracy: 0.5357 - val_loss: 1.0485 - val_accuracy: 0.7333
Epoch 70/150
12/12 - 0s - loss: 1.2307 - accuracy: 0.6071 - val_loss: 1.0226 - val_accuracy: 0.7333
Epoch 71/150
12/12 - 0s - loss: 1.2908 - accuracy: 0.5357 - val_loss: 0.9768 - val_accuracy: 0.7333
Epoch 72/150
12/12 - 0s - loss: 1.3027 - accuracy: 0.5536 - val_loss: 0.9260 - val_accuracy: 0.8000
Epoch 73/150
12/12 - 0s - loss: 1.3054 - accuracy: 0.5179 - val_loss: 0.8716 - val_accuracy: 0.8000
Epoch 74/150
12/12 - 0s - loss: 1.2338 - accuracy: 0.5714 - val_loss: 0.8702 - val_accuracy: 0.8000
Epoch 75/150
12/12 - 0s - loss: 1.1644 - accuracy: 0.6250 - val_loss: 0.8223 - val_accuracy: 0.8667
Epoch 76/150
12/12 - 0s - loss: 1.2032 - accuracy: 0.5714 - val_loss: 0.8314 - val_accuracy: 0.8667
Epoch 77/150
12/12 - 0s - loss: 1.1275 - accuracy: 0.6250 - val_loss: 0.8151 - val_accuracy: 0.8667
Epoch 78/150
12/12 - 0s - loss: 1.2519 - accuracy: 0.5893 - val_loss: 0.7580 - val_accuracy: 0.8667
Epoch 79/150
12/12 - 0s - loss: 1.2410 - accuracy: 0.5357 - val_loss: 0.7441 - val_accuracy: 0.8667
Epoch 80/150
12/12 - 0s - loss: 1.1261 - accuracy: 0.6071 - val_loss: 0.7726 - val_accuracy: 0.8667
Epoch 81/150
12/12 - 0s - loss: 1.2945 - accuracy: 0.5714 - val_loss: 0.8692 - val_accuracy: 0.7333
Epoch 82/150
12/12 - 0s - loss: 1.4732 - accuracy: 0.5893 - val_loss: 0.8312 - val_accuracy: 0.8000
Epoch 83/150
12/12 - 0s - loss: 1.2865 - accuracy: 0.5714 - val_loss: 0.8036 - val_accuracy: 0.8667
Epoch 84/150
12/12 - 0s - loss: 1.4767 - accuracy: 0.5357 - val_loss: 0.7638 - val_accuracy: 0.8667
Epoch 85/150
12/12 - 0s - loss: 1.3590 - accuracy: 0.6071 - val_loss: 0.7208 - val_accuracy: 0.8667
Epoch 86/150
12/12 - 0s - loss: 1.2526 - accuracy: 0.5893 - val_loss: 0.7062 - val_accuracy: 0.8667
Epoch 87/150
12/12 - 0s - loss: 1.0863 - accuracy: 0.6429 - val_loss: 0.7191 - val_accuracy: 0.8667
Epoch 88/150
12/12 - 0s - loss: 1.1105 - accuracy: 0.6250 - val_loss: 0.7537 - val_accuracy: 0.8667
Epoch 89/150
12/12 - 0s - loss: 1.0965 - accuracy: 0.5893 - val_loss: 0.8294 - val_accuracy: 0.8000
Epoch 90/150
12/12 - 0s - loss: 1.2534 - accuracy: 0.6250 - val_loss: 0.8122 - val_accuracy: 0.8000
Epoch 91/150
12/12 - 0s - loss: 1.1908 - accuracy: 0.5714 - val_loss: 0.7866 - val_accuracy: 0.8667
Epoch 92/150
12/12 - 0s - loss: 1.2157 - accuracy: 0.5893 - val_loss: 0.7501 - val_accuracy: 0.8667
Epoch 93/150
12/12 - 0s - loss: 1.2491 - accuracy: 0.6071 - val_loss: 0.7383 - val_accuracy: 0.8667
Epoch 94/150
12/12 - 0s - loss: 0.9303 - accuracy: 0.7143 - val_loss: 0.7529 - val_accuracy: 0.8667
Epoch 95/150
12/12 - 0s - loss: 1.1576 - accuracy: 0.5893 - val_loss: 0.7516 - val_accuracy: 0.8667
Epoch 96/150
12/12 - 0s - loss: 1.1619 - accuracy: 0.6071 - val_loss: 0.7393 - val_accuracy: 0.8667
Epoch 97/150
12/12 - 0s - loss: 1.3106 - accuracy: 0.5714 - val_loss: 0.7518 - val_accuracy: 0.8667
Epoch 98/150
12/12 - 0s - loss: 1.1753 - accuracy: 0.6429 - val_loss: 0.7536 - val_accuracy: 0.8667
Epoch 99/150
12/12 - 0s - loss: 1.0395 - accuracy: 0.6607 - val_loss: 0.7352 - val_accuracy: 0.8667
Epoch 100/150
12/12 - 0s - loss: 1.0457 - accuracy: 0.6786 - val_loss: 0.7622 - val_accuracy: 0.8667
Epoch 101/150
12/12 - 0s - loss: 1.1835 - accuracy: 0.6071 - val_loss: 0.7538 - val_accuracy: 0.8667
Epoch 102/150
12/12 - 0s - loss: 1.0810 - accuracy: 0.6607 - val_loss: 0.7602 - val_accuracy: 0.8667
Epoch 103/150
12/12 - 0s - loss: 1.1938 - accuracy: 0.6786 - val_loss: 0.7434 - val_accuracy: 0.8667
Epoch 104/150
12/12 - 0s - loss: 1.1502 - accuracy: 0.6071 - val_loss: 0.6999 - val_accuracy: 0.8667
Epoch 105/150
12/12 - 0s - loss: 1.0286 - accuracy: 0.6607 - val_loss: 0.7087 - val_accuracy: 0.8667
Epoch 106/150
12/12 - 0s - loss: 1.2122 - accuracy: 0.5536 - val_loss: 0.7036 - val_accuracy: 0.8667
Epoch 107/150
12/12 - 0s - loss: 1.0310 - accuracy: 0.6250 - val_loss: 0.6817 - val_accuracy: 0.8667
Epoch 108/150
12/12 - 0s - loss: 1.2842 - accuracy: 0.5357 - val_loss: 0.7070 - val_accuracy: 0.8667
Epoch 109/150
12/12 - 0s - loss: 1.1050 - accuracy: 0.6071 - val_loss: 0.6901 - val_accuracy: 0.8667
Epoch 110/150
12/12 - 0s - loss: 0.9695 - accuracy: 0.6250 - val_loss: 0.7197 - val_accuracy: 0.8667
Epoch 111/150
12/12 - 0s - loss: 1.1110 - accuracy: 0.6071 - val_loss: 0.7139 - val_accuracy: 0.8667
Epoch 112/150
12/12 - 0s - loss: 1.4096 - accuracy: 0.6429 - val_loss: 0.6872 - val_accuracy: 0.8667
Epoch 113/150
12/12 - 0s - loss: 1.2906 - accuracy: 0.5893 - val_loss: 0.7096 - val_accuracy: 0.8667
Epoch 114/150
12/12 - 0s - loss: 1.1264 - accuracy: 0.6250 - val_loss: 0.7140 - val_accuracy: 0.8667
Epoch 115/150
12/12 - 0s - loss: 1.0675 - accuracy: 0.6250 - val_loss: 0.7227 - val_accuracy: 0.8667
Epoch 116/150
12/12 - 0s - loss: 1.1068 - accuracy: 0.5893 - val_loss: 0.7143 - val_accuracy: 0.8667
Epoch 117/150
12/12 - 0s - loss: 1.0467 - accuracy: 0.6250 - val_loss: 0.7380 - val_accuracy: 0.8667
Epoch 118/150
12/12 - 0s - loss: 1.0845 - accuracy: 0.6964 - val_loss: 0.7044 - val_accuracy: 0.8667
Epoch 119/150
12/12 - 0s - loss: 1.0098 - accuracy: 0.6250 - val_loss: 0.6936 - val_accuracy: 0.8667
Epoch 120/150
12/12 - 0s - loss: 0.9956 - accuracy: 0.6786 - val_loss: 0.6940 - val_accuracy: 0.8667
Epoch 121/150
12/12 - 0s - loss: 1.0728 - accuracy: 0.6607 - val_loss: 0.6881 - val_accuracy: 0.8667
Epoch 122/150
12/12 - 0s - loss: 1.0603 - accuracy: 0.6607 - val_loss: 0.7034 - val_accuracy: 0.8667
Epoch 123/150
12/12 - 0s - loss: 0.9893 - accuracy: 0.6607 - val_loss: 0.7269 - val_accuracy: 0.8667
Epoch 124/150
12/12 - 0s - loss: 0.9639 - accuracy: 0.6786 - val_loss: 0.7199 - val_accuracy: 0.8667
Epoch 125/150
12/12 - 0s - loss: 1.0140 - accuracy: 0.6607 - val_loss: 0.7028 - val_accuracy: 0.8667
Epoch 126/150
12/12 - 0s - loss: 0.9704 - accuracy: 0.7143 - val_loss: 0.6930 - val_accuracy: 0.8667
Epoch 127/150
12/12 - 0s - loss: 1.0113 - accuracy: 0.6607 - val_loss: 0.6777 - val_accuracy: 0.8667
Epoch 128/150
12/12 - 0s - loss: 0.9410 - accuracy: 0.6607 - val_loss: 0.6811 - val_accuracy: 0.8667
Epoch 129/150
12/12 - 0s - loss: 1.0320 - accuracy: 0.6250 - val_loss: 0.6562 - val_accuracy: 0.8667
Epoch 130/150
12/12 - 0s - loss: 1.0589 - accuracy: 0.6429 - val_loss: 0.6418 - val_accuracy: 0.8667
Epoch 131/150
12/12 - 0s - loss: 0.9713 - accuracy: 0.7143 - val_loss: 0.6417 - val_accuracy: 0.8667
Epoch 132/150
12/12 - 0s - loss: 0.9688 - accuracy: 0.6964 - val_loss: 0.6563 - val_accuracy: 0.8667
Epoch 133/150
12/12 - 0s - loss: 0.9858 - accuracy: 0.6786 - val_loss: 0.6573 - val_accuracy: 0.8667
Epoch 134/150
12/12 - 0s - loss: 1.1632 - accuracy: 0.6607 - val_loss: 0.6531 - val_accuracy: 0.8667
Epoch 135/150
12/12 - 0s - loss: 1.0256 - accuracy: 0.6786 - val_loss: 0.6571 - val_accuracy: 0.8667
Epoch 136/150
12/12 - 0s - loss: 0.8915 - accuracy: 0.7143 - val_loss: 0.6566 - val_accuracy: 0.8667
Epoch 137/150
12/12 - 0s - loss: 0.9704 - accuracy: 0.6429 - val_loss: 0.6455 - val_accuracy: 0.8667
Epoch 138/150
12/12 - 0s - loss: 1.0017 - accuracy: 0.6607 - val_loss: 0.6182 - val_accuracy: 0.8667
Epoch 139/150
12/12 - 0s - loss: 1.0969 - accuracy: 0.6250 - val_loss: 0.6167 - val_accuracy: 0.8667
Epoch 140/150
12/12 - 0s - loss: 0.9828 - accuracy: 0.6429 - val_loss: 0.6001 - val_accuracy: 0.8667
Epoch 141/150
12/12 - 0s - loss: 0.9271 - accuracy: 0.6964 - val_loss: 0.5700 - val_accuracy: 0.8667
Epoch 142/150
12/12 - 0s - loss: 0.8309 - accuracy: 0.6786 - val_loss: 0.5757 - val_accuracy: 0.8667
Epoch 143/150
12/12 - 0s - loss: 0.9622 - accuracy: 0.6964 - val_loss: 0.6040 - val_accuracy: 0.8667
Epoch 144/150
12/12 - 0s - loss: 0.8952 - accuracy: 0.7321 - val_loss: 0.6198 - val_accuracy: 0.8667
Epoch 145/150
12/12 - 0s - loss: 0.9685 - accuracy: 0.6607 - val_loss: 0.6212 - val_accuracy: 0.8667
Epoch 146/150
12/12 - 0s - loss: 0.9178 - accuracy: 0.6964 - val_loss: 0.6253 - val_accuracy: 0.8667
Epoch 147/150
12/12 - 0s - loss: 0.9136 - accuracy: 0.6786 - val_loss: 0.6109 - val_accuracy: 0.8667
Epoch 148/150
12/12 - 0s - loss: 0.9396 - accuracy: 0.6607 - val_loss: 0.6013 - val_accuracy: 0.8667
Epoch 149/150
12/12 - 0s - loss: 0.8970 - accuracy: 0.7143 - val_loss: 0.6115 - val_accuracy: 0.8667
Epoch 150/150
12/12 - 0s - loss: 0.8831 - accuracy: 0.6964 - val_loss: 0.5973 - val_accuracy: 0.8667

In [ ]: